from bigflow import base
from bigflow import transforms
from bigflow import input
from bigflow import output
def cal_field_sum(line):
"""for top_field:
a[0] is current field, b[0] is next field
OR
a[0] is last field, b[0] is current field
"""
return line.reduce(lambda a, b: [int(a[0]) + int(b[0])] + b[1:])
def one_filter(arr, valid):
if arr[2] != valid:
return arr
res_pipe = []
wanted_num = '2'
input_path = './data'
output_path = './out'
pipeline = base.Pipeline.create('LOCAL')
tmp_pipe = pipeline.read(input.TextFile(input_path)) \
.map(lambda line: line.rstrip().split('\t')) \
.map(lambda arr: one_filter(arr, valid=0})) \
.filter(lambda arr: arr is not None) \
.group_by(lambda arr: arr[0], lambda arr: arr[1:]) \
.apply_values(cal_field_sum) \
.flatten() \
.max_elements(int(wanted_num), key=lambda (aggregation_word, arr): int(arr[0])) \
.map(lambda (aggregation_word, arr): [aggregation_word, str(arr[0])] + arr[1:]) \
.map(lambda arr: '\t'.join(arr))
res_pipe.append(tmp_pipe)
res = transforms.union(*res_pipe)
pipeline.write(res, output.TextFile(output_path).partition(2))
pipeline.run()